In [1]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#   LEARN FCN01 from FCN02
#

from __future__ import print_function
import argparse
import os

import numpy as np
import pickle
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate
from keras.layers import merge
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import list_pictures, array_to_img

from image_ext import list_pictures_in_multidir, load_imgs_asarray, img_dice_coeff
from create_fcn import create_fcn01, create_fcn02

np.random.seed(2016)
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)
In [2]:
def dice_coef(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return (2.*intersection + 1) / (K.sum(y_true) + K.sum(y_pred) + 1)

def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)
In [3]:
def load_fnames(paths):
    f = open(paths)
    data1 = f.read()
    f.close()
    lines = data1.split('\n')
    #print(len(lines))
    # 最終行は空行なので消す
    del(lines[len(lines)-1])
    #print(len(lines))
    return lines
In [4]:
def make_fnames(fnames,fpath,fpath_mask,mask_ext):
    fnames_img = [];
    fnames_mask= [];
    
    for i in range(len(fnames)):
        fnames_img.append(fpath + '/' + fnames[i]);
        fnames_mask.append(fpath_mask + '/' + mask_ext + fnames[i]);
        
    return [fnames_img,fnames_mask]
In [5]:
def get_center(im):
    im[im>0] = 1;
    xval = 0
    yval = 0
    npix = 0

    for x in range(0,im.shape[1]):
        xval += (x*sum(im[:,x]))
        npix += sum(im[:,x])
    
    for y in range(0,im.shape[0]):
        yval += (y*sum(im[y,:]))
    
    return [(xval+1)/(npix+1),(yval+1)/(npix+1)]
In [6]:
#
#  MAIN STARTS FROM HERE
#
if __name__ == '__main__':
    
    target_size = (224, 224)
    dpath_this = './'
    dname_checkpoints = 'checkpoints_fcn01_LAB'
    dname_checkpoints_fcn02 = 'checkpoints_fcn02'
    dname_outputs = 'outputs'
    fname_architecture = 'architecture.json'
    fname_weights = "model_weights_{epoch:02d}.h5"
    fname_stats = 'stats01.npz'
    dim_ordering = 'channels_first'
    fname_history = "history.pkl"

    # definision of mode, LEARN or TEST or SHOW_HISTORY
    mode = "LEARN"
    #mode = "SHOW_HISTORY"
    #mode = "TEST"

    # モデルを作成
    print('creating model fcn01 and fcn02...')
    model_fcn02 = create_fcn02(target_size)
    model_fcn01 = create_fcn01(target_size)
    
    if os.path.exists(dname_checkpoints) == 0:
        os.mkdir(dname_checkpoints)
creating model fcn01 and fcn02...
In [7]:
#
#   LEARNING MODE
#
if mode == "LEARN":
    # Read Learning Data
    fnames = load_fnames('data/list_train_01.txt')
    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')

    X_train = load_imgs_asarray(fpaths_xs_train, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_train = load_imgs_asarray(fpaths_ys_train, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering) 

    # Read Validation Data
    fnames = load_fnames('data/list_valid_01.txt')
    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')

    X_valid = load_imgs_asarray(fpaths_xs_valid, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_valid = load_imgs_asarray(fpaths_ys_valid, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)     

    print('==> ' + str(len(X_train)) + ' training images loaded')
    print('==> ' + str(len(Y_train)) + ' training masks loaded')
    print('==> ' + str(len(X_valid)) + ' validation images loaded')
    print('==> ' + str(len(Y_valid)) + ' validation masks loaded')

    # 前処理
    print('computing mean and standard deviation...')
    mean = np.mean(X_train, axis=(0, 2, 3))
    std = np.std(X_train, axis=(0, 2, 3))
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    print('saving mean and standard deviation to ' + fname_stats + '...')
    stats = {'mean': mean, 'std': std}
    np.savez(dname_checkpoints + '/' + fname_stats, **stats)
    print('==> done')

    print('globally normalizing data...')
    for i in range(3):
        X_train[:, i] = (X_train[:, i] - mean[i]) / std[i]
        X_valid[:, i] = (X_valid[:, i] - mean[i]) / std[i]
    Y_train /= 255
    Y_valid /= 255
    print('==> done')
==> 1452 training images loaded
==> 1452 training masks loaded
==> 527 validation images loaded
==> 527 validation masks loaded
computing mean and standard deviation...
==> mean: [143.01152 142.41399 107.15788]
==> std : [ 9.874445  8.032658 48.13504 ]
saving mean and standard deviation to stats01.npz...
==> done
globally normalizing data...
==> done
In [8]:
    # モデルに学習済のfcn02 Weightをロードする
    epoch = 200
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights_fcn02 = os.path.join(dname_checkpoints_fcn02, fname_weights)
    model_fcn02.load_weights(fpath_weights_fcn02)
    print('==> done')

    # load weights from Learned U-NET
    layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2',
                'up1_1', 'up1_2', 'up2_1', 'up2_2', 'conv_fin']
    
    print('copying layer weights')
    for name in layer_names:
        print(name)
        model_fcn01.get_layer(name).set_weights(model_fcn02.get_layer(name).get_weights())
        model_fcn01.get_layer(name).trainable = True
    
==> done
copying layer weights
conv1_1
conv1_2
conv2_1
conv2_2
up1_1
up1_2
up2_1
up2_2
conv_fin
In [8]:
    # 損失関数,最適化手法を定義
    adam = Adam(lr=1e-5)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.1, nesterov=True)
    #rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model_fcn01.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])

    # 構造・重みを保存するディレクトリーの有無を確認
    dpath_checkpoints = os.path.join(dpath_this, dname_checkpoints)
    if not os.path.isdir(dpath_checkpoints):
        os.mkdir(dpath_checkpoints)

    # 重みを保存するためのオブジェクトを用意
    fname_weights = "model_weights_{epoch:02d}.h5"
    fpath_weights = os.path.join(dpath_checkpoints, fname_weights)
    checkpointer = ModelCheckpoint(filepath=fpath_weights, save_best_only=False)      
In [9]:
    # トレーニングを開始
    print('start training...')
    history = model_fcn01.fit(X_train, Y_train, batch_size=64, epochs=400, verbose=1,
                  shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])
start training...
Train on 1452 samples, validate on 527 samples
Epoch 1/400
1452/1452 [==============================] - 83s 57ms/step - loss: -0.0159 - dice_coef: 0.0159 - val_loss: -0.0193 - val_dice_coef: 0.0193
Epoch 2/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.0170 - dice_coef: 0.0170 - val_loss: -0.0213 - val_dice_coef: 0.0213
Epoch 3/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.0200 - dice_coef: 0.0200 - val_loss: -0.0262 - val_dice_coef: 0.0262
Epoch 4/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.0298 - dice_coef: 0.0298 - val_loss: -0.0597 - val_dice_coef: 0.0597
Epoch 5/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.1203 - dice_coef: 0.1203 - val_loss: -0.2098 - val_dice_coef: 0.2098
Epoch 6/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.2116 - dice_coef: 0.2116 - val_loss: -0.1787 - val_dice_coef: 0.1787
Epoch 7/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.3303 - dice_coef: 0.3303 - val_loss: -0.4126 - val_dice_coef: 0.4126
Epoch 8/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.4758 - dice_coef: 0.4758 - val_loss: -0.5652 - val_dice_coef: 0.5652
Epoch 9/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.5785 - dice_coef: 0.5785 - val_loss: -0.6284 - val_dice_coef: 0.6284
Epoch 10/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6173 - dice_coef: 0.6173 - val_loss: -0.5027 - val_dice_coef: 0.5027
Epoch 11/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.5732 - dice_coef: 0.5732 - val_loss: -0.6063 - val_dice_coef: 0.6063
Epoch 12/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6555 - dice_coef: 0.6555 - val_loss: -0.6879 - val_dice_coef: 0.6879
Epoch 13/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6778 - dice_coef: 0.6778 - val_loss: -0.6891 - val_dice_coef: 0.6891
Epoch 14/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6908 - dice_coef: 0.6908 - val_loss: -0.7055 - val_dice_coef: 0.7055
Epoch 15/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6816 - dice_coef: 0.6816 - val_loss: -0.6636 - val_dice_coef: 0.6636
Epoch 16/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6982 - dice_coef: 0.6982 - val_loss: -0.7186 - val_dice_coef: 0.7186
Epoch 17/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7038 - dice_coef: 0.7038 - val_loss: -0.7106 - val_dice_coef: 0.7106
Epoch 18/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7123 - dice_coef: 0.7123 - val_loss: -0.7277 - val_dice_coef: 0.7277
Epoch 19/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7049 - dice_coef: 0.7049 - val_loss: -0.6547 - val_dice_coef: 0.6547
Epoch 20/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.6867 - dice_coef: 0.6867 - val_loss: -0.7259 - val_dice_coef: 0.7259
Epoch 21/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7277 - dice_coef: 0.7277 - val_loss: -0.7045 - val_dice_coef: 0.7045
Epoch 22/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7346 - dice_coef: 0.7346 - val_loss: -0.7443 - val_dice_coef: 0.7443
Epoch 23/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7254 - dice_coef: 0.7254 - val_loss: -0.7394 - val_dice_coef: 0.7394
Epoch 24/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7137 - dice_coef: 0.7137 - val_loss: -0.7465 - val_dice_coef: 0.7465
Epoch 25/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7418 - dice_coef: 0.7418 - val_loss: -0.7494 - val_dice_coef: 0.7494
Epoch 26/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7534 - dice_coef: 0.7534 - val_loss: -0.7528 - val_dice_coef: 0.7528
Epoch 27/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7533 - dice_coef: 0.7533 - val_loss: -0.7604 - val_dice_coef: 0.7604
Epoch 28/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7514 - dice_coef: 0.7514 - val_loss: -0.7397 - val_dice_coef: 0.7397
Epoch 29/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7567 - dice_coef: 0.7567 - val_loss: -0.7478 - val_dice_coef: 0.7478
Epoch 30/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7602 - dice_coef: 0.7602 - val_loss: -0.7650 - val_dice_coef: 0.7650
Epoch 31/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7632 - dice_coef: 0.7632 - val_loss: -0.7592 - val_dice_coef: 0.7592
Epoch 32/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7739 - dice_coef: 0.7739 - val_loss: -0.7500 - val_dice_coef: 0.7500
Epoch 33/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7654 - dice_coef: 0.7654 - val_loss: -0.7508 - val_dice_coef: 0.7508
Epoch 34/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7761 - dice_coef: 0.7761 - val_loss: -0.7722 - val_dice_coef: 0.7722
Epoch 35/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7801 - dice_coef: 0.7801 - val_loss: -0.7747 - val_dice_coef: 0.7747
Epoch 36/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7890 - dice_coef: 0.7890 - val_loss: -0.7837 - val_dice_coef: 0.7837
Epoch 37/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7820 - dice_coef: 0.7820 - val_loss: -0.7793 - val_dice_coef: 0.7793
Epoch 38/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7866 - dice_coef: 0.7866 - val_loss: -0.7844 - val_dice_coef: 0.7844
Epoch 39/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7847 - dice_coef: 0.7847 - val_loss: -0.7855 - val_dice_coef: 0.7855
Epoch 40/400
1452/1452 [==============================] - 56s 39ms/step - loss: -0.7954 - dice_coef: 0.7954 - val_loss: -0.7734 - val_dice_coef: 0.7734
Epoch 41/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7980 - dice_coef: 0.7980 - val_loss: -0.7868 - val_dice_coef: 0.7868
Epoch 42/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8027 - dice_coef: 0.8027 - val_loss: -0.7732 - val_dice_coef: 0.7732
Epoch 43/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8032 - dice_coef: 0.8032 - val_loss: -0.7946 - val_dice_coef: 0.7946
Epoch 44/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7952 - dice_coef: 0.7952 - val_loss: -0.7855 - val_dice_coef: 0.7855
Epoch 45/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.7998 - dice_coef: 0.7998 - val_loss: -0.7934 - val_dice_coef: 0.7934
Epoch 46/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8016 - dice_coef: 0.8016 - val_loss: -0.7811 - val_dice_coef: 0.7811
Epoch 47/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8089 - dice_coef: 0.8089 - val_loss: -0.7845 - val_dice_coef: 0.7845
Epoch 48/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8086 - dice_coef: 0.8086 - val_loss: -0.8042 - val_dice_coef: 0.8042
Epoch 49/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8065 - dice_coef: 0.8065 - val_loss: -0.8016 - val_dice_coef: 0.8016
Epoch 50/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8089 - dice_coef: 0.8089 - val_loss: -0.8029 - val_dice_coef: 0.8029
Epoch 51/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8157 - dice_coef: 0.8157 - val_loss: -0.7901 - val_dice_coef: 0.7901
Epoch 52/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8170 - dice_coef: 0.8170 - val_loss: -0.8075 - val_dice_coef: 0.8075
Epoch 53/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8122 - dice_coef: 0.8122 - val_loss: -0.7838 - val_dice_coef: 0.7838
Epoch 54/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8054 - dice_coef: 0.8054 - val_loss: -0.7724 - val_dice_coef: 0.7724
Epoch 55/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8117 - dice_coef: 0.8117 - val_loss: -0.8031 - val_dice_coef: 0.8031
Epoch 56/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8128 - dice_coef: 0.8128 - val_loss: -0.8036 - val_dice_coef: 0.8036
Epoch 57/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8216 - dice_coef: 0.8216 - val_loss: -0.8097 - val_dice_coef: 0.8097
Epoch 58/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8212 - dice_coef: 0.8212 - val_loss: -0.7545 - val_dice_coef: 0.7545
Epoch 59/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8157 - dice_coef: 0.8157 - val_loss: -0.8071 - val_dice_coef: 0.8071
Epoch 60/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8075 - dice_coef: 0.8075 - val_loss: -0.8124 - val_dice_coef: 0.8124
Epoch 61/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8223 - dice_coef: 0.8223 - val_loss: -0.8152 - val_dice_coef: 0.8152
Epoch 62/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8268 - dice_coef: 0.8268 - val_loss: -0.8138 - val_dice_coef: 0.8138
Epoch 63/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8298 - dice_coef: 0.8298 - val_loss: -0.8128 - val_dice_coef: 0.8128
Epoch 64/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8299 - dice_coef: 0.8299 - val_loss: -0.8083 - val_dice_coef: 0.8083
Epoch 65/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8263 - dice_coef: 0.8263 - val_loss: -0.8029 - val_dice_coef: 0.8029
Epoch 66/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8140 - dice_coef: 0.8140 - val_loss: -0.8175 - val_dice_coef: 0.8175
Epoch 67/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8257 - dice_coef: 0.8257 - val_loss: -0.8159 - val_dice_coef: 0.8159
Epoch 68/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8318 - dice_coef: 0.8318 - val_loss: -0.8215 - val_dice_coef: 0.8215
Epoch 69/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8205 - dice_coef: 0.8205 - val_loss: -0.8117 - val_dice_coef: 0.8117
Epoch 70/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8278 - dice_coef: 0.8278 - val_loss: -0.7985 - val_dice_coef: 0.7985
Epoch 71/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8306 - dice_coef: 0.8306 - val_loss: -0.8135 - val_dice_coef: 0.8135
Epoch 72/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8027 - dice_coef: 0.8027 - val_loss: -0.8039 - val_dice_coef: 0.8039
Epoch 73/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8246 - dice_coef: 0.8246 - val_loss: -0.8020 - val_dice_coef: 0.8020
Epoch 74/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8340 - dice_coef: 0.8340 - val_loss: -0.8227 - val_dice_coef: 0.8227
Epoch 75/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8362 - dice_coef: 0.8362 - val_loss: -0.8161 - val_dice_coef: 0.8161
Epoch 76/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8344 - dice_coef: 0.8344 - val_loss: -0.8216 - val_dice_coef: 0.8216
Epoch 77/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8391 - dice_coef: 0.8391 - val_loss: -0.8009 - val_dice_coef: 0.8009
Epoch 78/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8372 - dice_coef: 0.8372 - val_loss: -0.8097 - val_dice_coef: 0.8097
Epoch 79/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8352 - dice_coef: 0.8352 - val_loss: -0.7999 - val_dice_coef: 0.7999
Epoch 80/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8389 - dice_coef: 0.8389 - val_loss: -0.8096 - val_dice_coef: 0.8096
Epoch 81/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8425 - dice_coef: 0.8425 - val_loss: -0.8276 - val_dice_coef: 0.8276
Epoch 82/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8373 - dice_coef: 0.8373 - val_loss: -0.8289 - val_dice_coef: 0.8289
Epoch 83/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8354 - dice_coef: 0.8354 - val_loss: -0.8263 - val_dice_coef: 0.8263
Epoch 84/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8413 - dice_coef: 0.8413 - val_loss: -0.8244 - val_dice_coef: 0.8244
Epoch 85/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8409 - dice_coef: 0.8409 - val_loss: -0.8286 - val_dice_coef: 0.8286
Epoch 86/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8439 - dice_coef: 0.8439 - val_loss: -0.8286 - val_dice_coef: 0.8286
Epoch 87/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8441 - dice_coef: 0.8441 - val_loss: -0.8326 - val_dice_coef: 0.8326
Epoch 88/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8465 - dice_coef: 0.8465 - val_loss: -0.8149 - val_dice_coef: 0.8149
Epoch 89/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8464 - dice_coef: 0.8464 - val_loss: -0.8233 - val_dice_coef: 0.8233
Epoch 90/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8475 - dice_coef: 0.8475 - val_loss: -0.8156 - val_dice_coef: 0.8156
Epoch 91/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8459 - dice_coef: 0.8459 - val_loss: -0.8283 - val_dice_coef: 0.8283
Epoch 92/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8477 - dice_coef: 0.8477 - val_loss: -0.8327 - val_dice_coef: 0.8327
Epoch 93/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8473 - dice_coef: 0.8473 - val_loss: -0.8333 - val_dice_coef: 0.8333
Epoch 94/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8429 - dice_coef: 0.8429 - val_loss: -0.8328 - val_dice_coef: 0.8328
Epoch 95/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8411 - dice_coef: 0.8411 - val_loss: -0.8281 - val_dice_coef: 0.8281
Epoch 96/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8408 - dice_coef: 0.8408 - val_loss: -0.8118 - val_dice_coef: 0.8118
Epoch 97/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8438 - dice_coef: 0.8438 - val_loss: -0.8320 - val_dice_coef: 0.8320
Epoch 98/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8517 - dice_coef: 0.8517 - val_loss: -0.8344 - val_dice_coef: 0.8344
Epoch 99/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8492 - dice_coef: 0.8492 - val_loss: -0.8321 - val_dice_coef: 0.8321
Epoch 100/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8520 - dice_coef: 0.8520 - val_loss: -0.8274 - val_dice_coef: 0.8274
Epoch 101/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8487 - dice_coef: 0.8487 - val_loss: -0.8087 - val_dice_coef: 0.8087
Epoch 102/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8476 - dice_coef: 0.8476 - val_loss: -0.8389 - val_dice_coef: 0.8389
Epoch 103/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8518 - dice_coef: 0.8518 - val_loss: -0.8387 - val_dice_coef: 0.8387
Epoch 104/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8535 - dice_coef: 0.8535 - val_loss: -0.8364 - val_dice_coef: 0.8364
Epoch 105/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8514 - dice_coef: 0.8514 - val_loss: -0.8362 - val_dice_coef: 0.8362
Epoch 106/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8502 - dice_coef: 0.8502 - val_loss: -0.8295 - val_dice_coef: 0.8295
Epoch 107/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8542 - dice_coef: 0.8542 - val_loss: -0.8389 - val_dice_coef: 0.8389
Epoch 108/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8575 - dice_coef: 0.8575 - val_loss: -0.8377 - val_dice_coef: 0.8377
Epoch 109/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8578 - dice_coef: 0.8578 - val_loss: -0.8407 - val_dice_coef: 0.8407
Epoch 110/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8559 - dice_coef: 0.8559 - val_loss: -0.8373 - val_dice_coef: 0.8373
Epoch 111/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8549 - dice_coef: 0.8549 - val_loss: -0.8398 - val_dice_coef: 0.8398
Epoch 112/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8542 - dice_coef: 0.8542 - val_loss: -0.8415 - val_dice_coef: 0.8415
Epoch 113/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8522 - dice_coef: 0.8522 - val_loss: -0.8419 - val_dice_coef: 0.8419
Epoch 114/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8579 - dice_coef: 0.8579 - val_loss: -0.8423 - val_dice_coef: 0.8423
Epoch 115/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8543 - dice_coef: 0.8543 - val_loss: -0.8282 - val_dice_coef: 0.8282
Epoch 116/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8589 - dice_coef: 0.8589 - val_loss: -0.8390 - val_dice_coef: 0.8390
Epoch 117/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8515 - dice_coef: 0.8515 - val_loss: -0.8268 - val_dice_coef: 0.8268
Epoch 118/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8592 - dice_coef: 0.8592 - val_loss: -0.8434 - val_dice_coef: 0.8434
Epoch 119/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8565 - dice_coef: 0.8565 - val_loss: -0.8385 - val_dice_coef: 0.8385
Epoch 120/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8600 - dice_coef: 0.8600 - val_loss: -0.8450 - val_dice_coef: 0.8450
Epoch 121/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8617 - dice_coef: 0.8617 - val_loss: -0.8412 - val_dice_coef: 0.8412
Epoch 122/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8564 - dice_coef: 0.8564 - val_loss: -0.8415 - val_dice_coef: 0.8415
Epoch 123/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8386 - dice_coef: 0.8386 - val_loss: -0.8268 - val_dice_coef: 0.8268
Epoch 124/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8527 - dice_coef: 0.8527 - val_loss: -0.8226 - val_dice_coef: 0.8226
Epoch 125/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8551 - dice_coef: 0.8551 - val_loss: -0.8385 - val_dice_coef: 0.8385
Epoch 126/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8594 - dice_coef: 0.8594 - val_loss: -0.8392 - val_dice_coef: 0.8392
Epoch 127/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8607 - dice_coef: 0.8607 - val_loss: -0.8409 - val_dice_coef: 0.8409
Epoch 128/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8621 - dice_coef: 0.8621 - val_loss: -0.8430 - val_dice_coef: 0.8430
Epoch 129/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8645 - dice_coef: 0.8645 - val_loss: -0.8473 - val_dice_coef: 0.8473
Epoch 130/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8629 - dice_coef: 0.8629 - val_loss: -0.8430 - val_dice_coef: 0.8430
Epoch 131/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8407 - dice_coef: 0.8407 - val_loss: -0.7742 - val_dice_coef: 0.7742
Epoch 132/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8437 - dice_coef: 0.8437 - val_loss: -0.8417 - val_dice_coef: 0.8417
Epoch 133/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8568 - dice_coef: 0.8568 - val_loss: -0.8446 - val_dice_coef: 0.8446
Epoch 134/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8569 - dice_coef: 0.8569 - val_loss: -0.8372 - val_dice_coef: 0.8372
Epoch 135/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8580 - dice_coef: 0.8580 - val_loss: -0.8400 - val_dice_coef: 0.8400
Epoch 136/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8574 - dice_coef: 0.8574 - val_loss: -0.8345 - val_dice_coef: 0.8345
Epoch 137/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8596 - dice_coef: 0.8596 - val_loss: -0.8487 - val_dice_coef: 0.8487
Epoch 138/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8626 - dice_coef: 0.8626 - val_loss: -0.8469 - val_dice_coef: 0.8469
Epoch 139/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8655 - dice_coef: 0.8655 - val_loss: -0.8491 - val_dice_coef: 0.8491
Epoch 140/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8672 - dice_coef: 0.8672 - val_loss: -0.8393 - val_dice_coef: 0.8393
Epoch 141/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8659 - dice_coef: 0.8659 - val_loss: -0.8453 - val_dice_coef: 0.8453
Epoch 142/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8665 - dice_coef: 0.8665 - val_loss: -0.8508 - val_dice_coef: 0.8508
Epoch 143/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8532 - dice_coef: 0.8532 - val_loss: -0.8314 - val_dice_coef: 0.8314
Epoch 144/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8643 - dice_coef: 0.8643 - val_loss: -0.8456 - val_dice_coef: 0.8456
Epoch 145/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8673 - dice_coef: 0.8673 - val_loss: -0.8479 - val_dice_coef: 0.8479
Epoch 146/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8682 - dice_coef: 0.8682 - val_loss: -0.8497 - val_dice_coef: 0.8497
Epoch 147/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8622 - dice_coef: 0.8622 - val_loss: -0.8451 - val_dice_coef: 0.8451
Epoch 148/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8666 - dice_coef: 0.8666 - val_loss: -0.8429 - val_dice_coef: 0.8429
Epoch 149/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8599 - dice_coef: 0.8599 - val_loss: -0.8507 - val_dice_coef: 0.8507
Epoch 150/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8700 - dice_coef: 0.8700 - val_loss: -0.8500 - val_dice_coef: 0.8500
Epoch 151/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8697 - dice_coef: 0.8697 - val_loss: -0.8483 - val_dice_coef: 0.8483
Epoch 152/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8703 - dice_coef: 0.8703 - val_loss: -0.8445 - val_dice_coef: 0.8445
Epoch 153/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8717 - dice_coef: 0.8717 - val_loss: -0.8520 - val_dice_coef: 0.8520
Epoch 154/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8719 - dice_coef: 0.8719 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 155/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8717 - dice_coef: 0.8717 - val_loss: -0.8506 - val_dice_coef: 0.8506
Epoch 156/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8643 - dice_coef: 0.8643 - val_loss: -0.8464 - val_dice_coef: 0.8464
Epoch 157/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8695 - dice_coef: 0.8695 - val_loss: -0.8483 - val_dice_coef: 0.8483
Epoch 158/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8645 - dice_coef: 0.8645 - val_loss: -0.8396 - val_dice_coef: 0.8396
Epoch 159/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8699 - dice_coef: 0.8699 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 160/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8717 - dice_coef: 0.8717 - val_loss: -0.8456 - val_dice_coef: 0.8456
Epoch 161/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8704 - dice_coef: 0.8704 - val_loss: -0.8400 - val_dice_coef: 0.8400
Epoch 162/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8693 - dice_coef: 0.8693 - val_loss: -0.8324 - val_dice_coef: 0.8324
Epoch 163/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8675 - dice_coef: 0.8675 - val_loss: -0.8256 - val_dice_coef: 0.8256
Epoch 164/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8640 - dice_coef: 0.8640 - val_loss: -0.8532 - val_dice_coef: 0.8532
Epoch 165/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8711 - dice_coef: 0.8711 - val_loss: -0.8507 - val_dice_coef: 0.8507
Epoch 166/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8684 - dice_coef: 0.8684 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 167/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8698 - dice_coef: 0.8698 - val_loss: -0.8540 - val_dice_coef: 0.8540
Epoch 168/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8748 - dice_coef: 0.8748 - val_loss: -0.8528 - val_dice_coef: 0.8528
Epoch 169/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8758 - dice_coef: 0.8758 - val_loss: -0.8542 - val_dice_coef: 0.8542
Epoch 170/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8722 - dice_coef: 0.8722 - val_loss: -0.8536 - val_dice_coef: 0.8536
Epoch 171/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8685 - dice_coef: 0.8685 - val_loss: -0.8475 - val_dice_coef: 0.8475
Epoch 172/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8566 - dice_coef: 0.8566 - val_loss: -0.8511 - val_dice_coef: 0.8511
Epoch 173/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8703 - dice_coef: 0.8703 - val_loss: -0.8525 - val_dice_coef: 0.8525
Epoch 174/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8729 - dice_coef: 0.8729 - val_loss: -0.8548 - val_dice_coef: 0.8548
Epoch 175/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8675 - dice_coef: 0.8675 - val_loss: -0.8467 - val_dice_coef: 0.8467
Epoch 176/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8603 - dice_coef: 0.8603 - val_loss: -0.8506 - val_dice_coef: 0.8506
Epoch 177/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8733 - dice_coef: 0.8733 - val_loss: -0.8488 - val_dice_coef: 0.8488
Epoch 178/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8725 - dice_coef: 0.8725 - val_loss: -0.8403 - val_dice_coef: 0.8403
Epoch 179/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8772 - dice_coef: 0.8772 - val_loss: -0.8554 - val_dice_coef: 0.8554
Epoch 180/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8786 - dice_coef: 0.8786 - val_loss: -0.8370 - val_dice_coef: 0.8370
Epoch 181/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8747 - dice_coef: 0.8747 - val_loss: -0.8562 - val_dice_coef: 0.8562
Epoch 182/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8787 - dice_coef: 0.8787 - val_loss: -0.8512 - val_dice_coef: 0.8512
Epoch 183/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8782 - dice_coef: 0.8782 - val_loss: -0.8490 - val_dice_coef: 0.8490
Epoch 184/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8789 - dice_coef: 0.8789 - val_loss: -0.8564 - val_dice_coef: 0.8564
Epoch 185/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8785 - dice_coef: 0.8785 - val_loss: -0.8569 - val_dice_coef: 0.8569
Epoch 186/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8804 - dice_coef: 0.8804 - val_loss: -0.8433 - val_dice_coef: 0.8433
Epoch 187/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8753 - dice_coef: 0.8753 - val_loss: -0.8499 - val_dice_coef: 0.8499
Epoch 188/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8779 - dice_coef: 0.8779 - val_loss: -0.8464 - val_dice_coef: 0.8464
Epoch 189/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8762 - dice_coef: 0.8762 - val_loss: -0.8580 - val_dice_coef: 0.8580
Epoch 190/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8794 - dice_coef: 0.8794 - val_loss: -0.8513 - val_dice_coef: 0.8513
Epoch 191/400
1452/1452 [==============================] - 56s 38ms/step - loss: -0.8795 - dice_coef: 0.8795 - val_loss: -0.8560 - val_dice_coef: 0.8560
Epoch 192/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8794 - dice_coef: 0.8794 - val_loss: -0.8518 - val_dice_coef: 0.8518
Epoch 193/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8763 - dice_coef: 0.8763 - val_loss: -0.8489 - val_dice_coef: 0.8489
Epoch 194/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8800 - dice_coef: 0.8800 - val_loss: -0.8597 - val_dice_coef: 0.8597
Epoch 195/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8757 - dice_coef: 0.8757 - val_loss: -0.8592 - val_dice_coef: 0.8592
Epoch 196/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8749 - dice_coef: 0.8749 - val_loss: -0.8533 - val_dice_coef: 0.8533
Epoch 197/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8785 - dice_coef: 0.8785 - val_loss: -0.8592 - val_dice_coef: 0.8592
Epoch 198/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8827 - dice_coef: 0.8827 - val_loss: -0.8595 - val_dice_coef: 0.8595
Epoch 199/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8784 - dice_coef: 0.8784 - val_loss: -0.8546 - val_dice_coef: 0.8546
Epoch 200/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8746 - dice_coef: 0.8746 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 201/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8795 - dice_coef: 0.8795 - val_loss: -0.8597 - val_dice_coef: 0.8597
Epoch 202/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8824 - dice_coef: 0.8824 - val_loss: -0.8517 - val_dice_coef: 0.8517
Epoch 203/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8840 - dice_coef: 0.8840 - val_loss: -0.8605 - val_dice_coef: 0.8605
Epoch 204/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8834 - dice_coef: 0.8834 - val_loss: -0.8462 - val_dice_coef: 0.8462
Epoch 205/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8810 - dice_coef: 0.8810 - val_loss: -0.8601 - val_dice_coef: 0.8601
Epoch 206/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8748 - dice_coef: 0.8748 - val_loss: -0.8366 - val_dice_coef: 0.8366
Epoch 207/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8689 - dice_coef: 0.8689 - val_loss: -0.8525 - val_dice_coef: 0.8525
Epoch 208/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8833 - dice_coef: 0.8833 - val_loss: -0.8611 - val_dice_coef: 0.8611
Epoch 209/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8840 - dice_coef: 0.8840 - val_loss: -0.8620 - val_dice_coef: 0.8620
Epoch 210/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8837 - dice_coef: 0.8837 - val_loss: -0.8607 - val_dice_coef: 0.8607
Epoch 211/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8809 - dice_coef: 0.8809 - val_loss: -0.8588 - val_dice_coef: 0.8588
Epoch 212/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8821 - dice_coef: 0.8821 - val_loss: -0.8427 - val_dice_coef: 0.8427
Epoch 213/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8813 - dice_coef: 0.8813 - val_loss: -0.8613 - val_dice_coef: 0.8613
Epoch 214/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8859 - dice_coef: 0.8859 - val_loss: -0.8497 - val_dice_coef: 0.8497
Epoch 215/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8818 - dice_coef: 0.8818 - val_loss: -0.8601 - val_dice_coef: 0.8601
Epoch 216/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8830 - dice_coef: 0.8830 - val_loss: -0.8516 - val_dice_coef: 0.8516
Epoch 217/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8850 - dice_coef: 0.8850 - val_loss: -0.8566 - val_dice_coef: 0.8566
Epoch 218/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8783 - dice_coef: 0.8783 - val_loss: -0.8586 - val_dice_coef: 0.8586
Epoch 219/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8857 - dice_coef: 0.8857 - val_loss: -0.8617 - val_dice_coef: 0.8617
Epoch 220/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8858 - dice_coef: 0.8858 - val_loss: -0.8600 - val_dice_coef: 0.8600
Epoch 221/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8860 - dice_coef: 0.8860 - val_loss: -0.8601 - val_dice_coef: 0.8601
Epoch 222/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8871 - dice_coef: 0.8871 - val_loss: -0.8524 - val_dice_coef: 0.8524
Epoch 223/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8831 - dice_coef: 0.8831 - val_loss: -0.8363 - val_dice_coef: 0.8363
Epoch 224/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8855 - dice_coef: 0.8855 - val_loss: -0.8616 - val_dice_coef: 0.8616
Epoch 225/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8883 - dice_coef: 0.8883 - val_loss: -0.8623 - val_dice_coef: 0.8623
Epoch 226/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8894 - dice_coef: 0.8894 - val_loss: -0.8606 - val_dice_coef: 0.8606
Epoch 227/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8867 - dice_coef: 0.8867 - val_loss: -0.8602 - val_dice_coef: 0.8602
Epoch 228/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8896 - dice_coef: 0.8896 - val_loss: -0.8611 - val_dice_coef: 0.8611
Epoch 229/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8883 - dice_coef: 0.8883 - val_loss: -0.8559 - val_dice_coef: 0.8559
Epoch 230/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8910 - dice_coef: 0.8910 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 231/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8906 - dice_coef: 0.8906 - val_loss: -0.8622 - val_dice_coef: 0.8622
Epoch 232/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8891 - dice_coef: 0.8891 - val_loss: -0.8530 - val_dice_coef: 0.8530
Epoch 233/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8908 - dice_coef: 0.8908 - val_loss: -0.8570 - val_dice_coef: 0.8570
Epoch 234/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8901 - dice_coef: 0.8901 - val_loss: -0.8626 - val_dice_coef: 0.8626
Epoch 235/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8918 - dice_coef: 0.8918 - val_loss: -0.8539 - val_dice_coef: 0.8539
Epoch 236/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8874 - dice_coef: 0.8874 - val_loss: -0.8627 - val_dice_coef: 0.8627
Epoch 237/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8928 - dice_coef: 0.8928 - val_loss: -0.8563 - val_dice_coef: 0.8563
Epoch 238/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8924 - dice_coef: 0.8924 - val_loss: -0.8632 - val_dice_coef: 0.8632
Epoch 239/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8943 - dice_coef: 0.8943 - val_loss: -0.8589 - val_dice_coef: 0.8589
Epoch 240/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8936 - dice_coef: 0.8936 - val_loss: -0.8635 - val_dice_coef: 0.8635
Epoch 241/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8885 - dice_coef: 0.8885 - val_loss: -0.8627 - val_dice_coef: 0.8627
Epoch 242/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8845 - dice_coef: 0.8845 - val_loss: -0.8613 - val_dice_coef: 0.8613
Epoch 243/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8914 - dice_coef: 0.8914 - val_loss: -0.8501 - val_dice_coef: 0.8501
Epoch 244/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8933 - dice_coef: 0.8933 - val_loss: -0.8592 - val_dice_coef: 0.8592
Epoch 245/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8941 - dice_coef: 0.8941 - val_loss: -0.8652 - val_dice_coef: 0.8652
Epoch 246/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8940 - dice_coef: 0.8940 - val_loss: -0.8609 - val_dice_coef: 0.8609
Epoch 247/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8894 - dice_coef: 0.8894 - val_loss: -0.8546 - val_dice_coef: 0.8546
Epoch 248/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8945 - dice_coef: 0.8945 - val_loss: -0.8621 - val_dice_coef: 0.8621
Epoch 249/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8896 - dice_coef: 0.8896 - val_loss: -0.8397 - val_dice_coef: 0.8397
Epoch 250/400
1452/1452 [==============================] - 55s 38ms/step - loss: -0.8896 - dice_coef: 0.8896 - val_loss: -0.8481 - val_dice_coef: 0.8481
Epoch 251/400
 128/1452 [=>............................] - ETA: 43s - loss: -0.8998 - dice_coef: 0.8998
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-9-5128ef89a660> in <module>()
      2 print('start training...')
      3 history = model_fcn01.fit(X_train, Y_train, batch_size=64, epochs=400, verbose=1,
----> 4               shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1655                               initial_epoch=initial_epoch,
   1656                               steps_per_epoch=steps_per_epoch,
-> 1657                               validation_steps=validation_steps)
   1658 
   1659     def evaluate(self, x=None, y=None,

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
   1211                     batch_logs['size'] = len(batch_ids)
   1212                     callbacks.on_batch_begin(batch_index, batch_logs)
-> 1213                     outs = f(ins_batch)
   1214                     if not isinstance(outs, list):
   1215                         outs = [outs]

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2355         session = get_session()
   2356         updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2357                               **self.session_kwargs)
   2358         return updated[:len(self.outputs)]
   2359 

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    887     try:
    888       result = self._run(None, fetches, feed_dict, options_ptr,
--> 889                          run_metadata_ptr)
    890       if run_metadata:
    891         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1119       results = self._do_run(handle, final_targets, final_fetches,
-> 1120                              feed_dict_tensor, options, run_metadata)
   1121     else:
   1122       results = []

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1315     if handle is None:
   1316       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317                            options, run_metadata)
   1318     else:
   1319       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1321   def _do_call(self, fn, *args):
   1322     try:
-> 1323       return fn(*args)
   1324     except errors.OpError as e:
   1325       message = compat.as_text(e.message)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1300           return tf_session.TF_Run(session, options,
   1301                                    feed_dict, fetch_list, target_list,
-> 1302                                    status, run_metadata)
   1303 
   1304     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 
In [10]:
    # Save History
    f = open(dname_checkpoints + '/' + fname_history,'wb')
    pickle.dump(history.history,f)
    f.close
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-10-dadc4c67b0f0> in <module>()
      1 # Save History
      2 f = open(dname_checkpoints + '/' + fname_history,'wb')
----> 3 pickle.dump(history.history,f)
      4 f.close

NameError: name 'history' is not defined
In [11]:
#
#  TEST MODE
#
mode = 'TEST'
if mode == "TEST":
    # Prediction (test) mode

    # 学習済みの重みをロード
    epoch = 200
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights = os.path.join(dname_checkpoints, fname_weights)
    model_fcn01.load_weights(fpath_weights)
    print('==> done')
==> done
In [14]:
    # Read Test Data
    fnames = load_fnames('data/list_test_01.txt')

    [fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data.LAB/img','data.LAB/mask','OperatorA_')

    X_test = load_imgs_asarray(fpaths_xs_test, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
  

    # トレーニング時に計算した平均・標準偏差をロード    
    print('loading mean and standard deviation from ' + fname_stats + '...')
    stats = np.load(dname_checkpoints + '/' + fname_stats)
    mean = stats['mean']
    std = stats['std']
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    for i in range(3):
        X_test[:, i] = (X_test[:, i] - mean[i]) / std[i]
    print('==> done')
loading mean and standard deviation from stats01.npz...
==> mean: [143.01152 142.41399 107.15788]
==> std : [ 9.874445  8.032658 48.13504 ]
==> done
In [17]:
    from PIL import Image
    import matplotlib.pyplot as plt

    
    # 学習済みの重みをロード
    epoch = 100
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights = os.path.join(dname_checkpoints, fname_weights)
    model_fcn01.load_weights(fpath_weights)
    print('==> done')

    # テストを開始
    outputs = model_fcn01.predict(X_test)
    #    outputs = model_fcn02.predict(X_test)

    # 出力を画像として保存
    dname_outputs = './outputs/'
    if not os.path.isdir(dname_outputs):
        print('create directory: %s'%(dname_outputs))
        os.mkdir(dname_outputs)

    print('saving outputs as images...')
    n = 0
    for i, array in enumerate(outputs):
        array = np.where(array > 0.5, 1, 0) # 二値に変換
        array = array.astype(np.float32)
        img_out = array_to_img(array, dim_ordering)
        # fpath_out = os.path.join(dname_outputs, fnames[i])
        fpath_out = os.path.join(dname_outputs, "%05d.png"%(n))
        img_out.save(fpath_out)
        n = n + 1

    print('==> done')

    n = 0
    diff = []
    dice_eval = []
    center_test = []
    center_gt = []

    for i in range(len(fpaths_xs_test)):
        # テスト画像
        im1 = Image.open(fpaths_xs_test[i])

        im1 = im1.resize((320,240)) 
        # 出力結果
        im2 = Image.open(os.path.join(dname_outputs, "%05d.png"%(n)))
        center_test.append(get_center(np.array(im2)))         
        im2 = im2.resize((320,240))
        # Grond Truth
        im3 = Image.open(fpaths_ys_test[i])
        im3t = im3.resize(target_size)
        center_gt.append(get_center(np.array(im3t)))
        im3 = im3.resize((320,240))

        im2_d = np.zeros((240,320,3), 'uint8')
        im2_d[:,:,0] = np.array(im2)
        im2_d[:,:,1] = np.array(im3)*255
        im2_d[:,:,2] = 0

        # Compute dice coeff
        im2a = np.array(im2)
        im2a[im2a > 0] = 1
        im3a = np.array(im3)
        im3a[im3a > 0] = 1

        overlap_a = np.array(im2a) * np.array(im3a)
        overlap_b = np.array(im2a) + np.array(im3a)
        #print('%03d: Dice Coeff = %f'%(i, 2*sum(sum(overlap_a))/sum(sum(overlap_b))))
        #print('%f'%img_dice_coeff(im2,im3))
        dice_eval.append(2*sum(sum(overlap_a))/sum(sum(overlap_b)))

        #plt.imshow(np.hstack((np.array(im1),np.array(im2_d))))
        #plt.show()
        if (i%100) == 0:
            print('Prcessing image %d / %d'%(i,len(fpaths_xs_test)))

        n = n + 1

    diff = np.array(center_test) - np.array(center_gt)
    print('%d: Dice eval av. : %f'%(epoch,np.mean(np.array(dice_eval))))
    print('%d: Center Diff av. : %f'%(epoch,np.sum(np.linalg.norm(diff,axis=1))/diff.shape[0]))
==> done
saving outputs as images...
==> done
Prcessing image 0 / 270
Prcessing image 100 / 270
Prcessing image 200 / 270
100: Dice eval av. : 0.781163
100: Center Diff av. : 2.696763
In [ ]:
    # テストを開始
    outputs = model_fcn01.predict(X_test)
    
In [17]:
    # 出力を画像として保存
    dname_outputs = './outputs/'
    if not os.path.isdir(dname_outputs):
        print('create directory: %s'%(dname_outputs))
        os.mkdir(dname_outputs)

    print('saving outputs as images...')
    n = 0
    for i, array in enumerate(outputs):
        array = np.where(array > 0.5, 1, 0) # 二値に変換
        array = array.astype(np.float32)
        img_out = array_to_img(array, dim_ordering)
        # fpath_out = os.path.join(dname_outputs, fnames[i])
        fpath_out = os.path.join(dname_outputs, "%05d.png"%(n))
        img_out.save(fpath_out)
        n = n + 1

    print('==> done')
saving outputs as images...
==> done
In [21]:
    from PIL import Image
    import matplotlib.pyplot as plt

    n = 0
    dice_eval = []
    
    for i in range(len(fpaths_xs_test)):
        # テスト画像
        im1 = Image.open(fpaths_xs_test[i])
        im1 = im1.resize((320,240)) 
        # 出力結果
        im2 = Image.open(os.path.join(dname_outputs, "%05d.png"%(n)))
        im2 = im2.resize((320,240))
        # Grond Truth
        im3 = Image.open(fpaths_ys_test[i])
        im3 = im3.resize((320,240))

        im2_d = np.zeros((240,320,3), 'uint8')
        im2_d[:,:,0] = np.array(im2)
        im2_d[:,:,1] = np.array(im3)*255
        im2_d[:,:,2] = 0

        # Compute dice coeff
        im2a = np.array(im2)
        im2a[im2a > 0] = 1
        im3a = np.array(im3)
        im3a[im3a > 0] = 1
        
        overlap_a = np.array(im2a) * np.array(im3a)
        overlap_b = np.array(im2a) + np.array(im3a)
        print('%03d: Dice Coeff = %f'%(i, 2*sum(sum(overlap_a))/sum(sum(overlap_b))))
        print('%f'%img_dice_coeff(im2,im3))
        dice_eval.append(2*sum(sum(overlap_a))/sum(sum(overlap_b)))

        plt.imshow(np.hstack((np.array(im1),np.array(im2_d))))
        plt.show()

        n = n + 1
    
    print('Dice eval av. : %f'%np.mean(np.array(dice_eval)))
000: Dice Coeff = 0.899851
0.899851
001: Dice Coeff = 0.738682
0.738682
002: Dice Coeff = 0.869748
0.869748
003: Dice Coeff = 0.820842
0.820842
004: Dice Coeff = 0.729443
0.729443
005: Dice Coeff = 0.766404
0.766404
006: Dice Coeff = 0.766885
0.766885
007: Dice Coeff = 0.752830
0.752830
008: Dice Coeff = 0.870311
0.870311
009: Dice Coeff = 0.859688
0.859688
010: Dice Coeff = 0.926719
0.926719
011: Dice Coeff = 0.555901
0.555901
012: Dice Coeff = 0.818182
0.818182
013: Dice Coeff = 0.960317
0.960317
014: Dice Coeff = 0.635920
0.635920
015: Dice Coeff = 0.865707
0.865707
016: Dice Coeff = 0.874627
0.874627
017: Dice Coeff = 0.921438
0.921438
018: Dice Coeff = 0.899204
0.899204
019: Dice Coeff = 0.903614
0.903614
020: Dice Coeff = 0.876738
0.876738
021: Dice Coeff = 0.789474
0.789474
022: Dice Coeff = 0.882250
0.882250
023: Dice Coeff = 0.968654
0.968654
024: Dice Coeff = 0.958386
0.958386
025: Dice Coeff = 0.909297
0.909297
026: Dice Coeff = 0.944810
0.944810
027: Dice Coeff = 0.905051
0.905051
028: Dice Coeff = 0.932961
0.932961
029: Dice Coeff = 0.844391
0.844391
030: Dice Coeff = 0.933333
0.933333
031: Dice Coeff = 0.956618
0.956618
032: Dice Coeff = 0.894091
0.894091
033: Dice Coeff = 0.804598
0.804598
034: Dice Coeff = 0.942209
0.942209
035: Dice Coeff = 0.890715
0.890715
036: Dice Coeff = 0.874743
0.874743
037: Dice Coeff = 0.845369
0.845369
038: Dice Coeff = 0.751445
0.751445
039: Dice Coeff = 0.852564
0.852564
040: Dice Coeff = 0.873700
0.873700
041: Dice Coeff = 0.942714
0.942714
042: Dice Coeff = 0.926499
0.926499
043: Dice Coeff = 0.841584
0.841584
044: Dice Coeff = 0.887439
0.887439
045: Dice Coeff = 0.861518
0.861518
046: Dice Coeff = 0.924242
0.924242
047: Dice Coeff = 0.958018
0.958018
048: Dice Coeff = 0.893004
0.893004
049: Dice Coeff = 0.790607
0.790607
050: Dice Coeff = 0.888889
0.888889
051: Dice Coeff = 0.855377
0.855377
052: Dice Coeff = 0.939457
0.939457
053: Dice Coeff = 0.934579
0.934579
054: Dice Coeff = 0.925307
0.925307
055: Dice Coeff = 0.756303
0.756303
056: Dice Coeff = 0.915646
0.915646
057: Dice Coeff = 0.866477
0.866477
058: Dice Coeff = 0.923864
0.923864
059: Dice Coeff = 0.937557
0.937557
060: Dice Coeff = 0.869707
0.869707
061: Dice Coeff = 0.891599
0.891599
062: Dice Coeff = 0.882658
0.882658
063: Dice Coeff = 0.852941
0.852941
064: Dice Coeff = 0.772016
0.772016
065: Dice Coeff = 0.883146
0.883146
066: Dice Coeff = 0.777969
0.777969
067: Dice Coeff = 0.877657
0.877657
068: Dice Coeff = 0.890835
0.890835
069: Dice Coeff = 0.656906
0.656906
070: Dice Coeff = 0.847571
0.847571
071: Dice Coeff = 0.886680
0.886680
072: Dice Coeff = 0.879808
0.879808
073: Dice Coeff = 0.919774
0.919774
074: Dice Coeff = 0.883249
0.883249
075: Dice Coeff = 0.883227
0.883227
076: Dice Coeff = 0.756129
0.756129
077: Dice Coeff = 0.776952
0.776952
078: Dice Coeff = 0.723455
0.723455
079: Dice Coeff = 0.859091
0.859091
080: Dice Coeff = 0.799847
0.799847
081: Dice Coeff = 0.664356
0.664356
082: Dice Coeff = 0.594912
0.594912
083: Dice Coeff = 0.871935
0.871935
084: Dice Coeff = 0.805556
0.805556
085: Dice Coeff = 0.825137
0.825137
086: Dice Coeff = 0.765543
0.765543
087: Dice Coeff = 0.671551
0.671551
088: Dice Coeff = 0.910369
0.910369
089: Dice Coeff = 0.857790
0.857790
090: Dice Coeff = 0.822238
0.822238
091: Dice Coeff = 0.907486
0.907486
092: Dice Coeff = 0.930616
0.930616
093: Dice Coeff = 0.809422
0.809422
094: Dice Coeff = 0.919271
0.919271
095: Dice Coeff = 0.619883
0.619883
096: Dice Coeff = 0.788000
0.788000
097: Dice Coeff = 0.865072
0.865072
098: Dice Coeff = 0.825737
0.825737
099: Dice Coeff = 0.795556
0.795556
100: Dice Coeff = 0.784409
0.784409
101: Dice Coeff = 0.698639
0.698639
102: Dice Coeff = 0.607725
0.607725
103: Dice Coeff = 0.802834
0.802834
104: Dice Coeff = 0.651828
0.651828
105: Dice Coeff = 0.747145
0.747145
106: Dice Coeff = 0.665789
0.665789
107: Dice Coeff = 0.838870
0.838870
108: Dice Coeff = 0.751890
0.751890
109: Dice Coeff = 0.667461
0.667461
110: Dice Coeff = 0.823529
0.823529
111: Dice Coeff = 0.896209
0.896209
112: Dice Coeff = 0.756225
0.756225
113: Dice Coeff = 0.931507
0.931507
114: Dice Coeff = 0.905356
0.905356
115: Dice Coeff = 0.559767
0.559767
116: Dice Coeff = 0.899866
0.899866
117: Dice Coeff = 0.890667
0.890667
118: Dice Coeff = 0.883295
0.883295
119: Dice Coeff = 0.944444
0.944444
120: Dice Coeff = 0.887139
0.887139
121: Dice Coeff = 0.796380
0.796380
122: Dice Coeff = 0.890019
0.890019
123: Dice Coeff = 0.879855
0.879855
124: Dice Coeff = 0.856378
0.856378
125: Dice Coeff = 0.817043
0.817043
126: Dice Coeff = 0.729483
0.729483
127: Dice Coeff = 0.921273
0.921273
128: Dice Coeff = 0.640100
0.640100
129: Dice Coeff = 0.840090
0.840090
130: Dice Coeff = 0.863362
0.863362
131: Dice Coeff = 0.898907
0.898907
132: Dice Coeff = 0.917836
0.917836
133: Dice Coeff = 0.874610
0.874610
134: Dice Coeff = 0.783547
0.783547
135: Dice Coeff = 0.844195
0.844195
136: Dice Coeff = 0.899041
0.899041
137: Dice Coeff = 0.080560
0.080560
138: Dice Coeff = 0.946648
0.946648
139: Dice Coeff = 0.876344
0.876344
140: Dice Coeff = 0.911796
0.911796
141: Dice Coeff = 0.456825
0.456825
142: Dice Coeff = 0.739935
0.739935
143: Dice Coeff = 0.872727
0.872727
144: Dice Coeff = 0.883632
0.883632
145: Dice Coeff = 0.875385
0.875385
146: Dice Coeff = 0.849244
0.849244
147: Dice Coeff = 0.927921
0.927921
148: Dice Coeff = 0.823117
0.823117
149: Dice Coeff = 0.830075
0.830075
150: Dice Coeff = 0.808717
0.808717
151: Dice Coeff = 0.762510
0.762510
152: Dice Coeff = 0.823529
0.823529
153: Dice Coeff = 0.624733
0.624733
154: Dice Coeff = 0.869369
0.869369
155: Dice Coeff = 0.775606
0.775606
156: Dice Coeff = 0.815890
0.815890
157: Dice Coeff = 0.648199
0.648199
158: Dice Coeff = 0.916843
0.916843
159: Dice Coeff = 0.723898
0.723898
160: Dice Coeff = 0.788413
0.788413
161: Dice Coeff = 0.740883
0.740883
162: Dice Coeff = 0.879640
0.879640
163: Dice Coeff = 0.857143
0.857143
164: Dice Coeff = 0.772983
0.772983
165: Dice Coeff = 0.609808
0.609808
166: Dice Coeff = 0.841823
0.841823
167: Dice Coeff = 0.828135
0.828135
168: Dice Coeff = 0.757489
0.757489
169: Dice Coeff = 0.848546
0.848546
170: Dice Coeff = 0.669903
0.669903
171: Dice Coeff = 0.778281
0.778281
172: Dice Coeff = 0.780220
0.780220
173: Dice Coeff = 0.854902
0.854902
174: Dice Coeff = 0.905547
0.905547
175: Dice Coeff = 0.856525
0.856525
176: Dice Coeff = 0.748428
0.748428
177: Dice Coeff = 0.894376
0.894376
178: Dice Coeff = 0.796584
0.796584
179: Dice Coeff = 0.806826
0.806826
180: Dice Coeff = 0.807773
0.807773
181: Dice Coeff = 0.865385
0.865385
182: Dice Coeff = 0.736739
0.736739
183: Dice Coeff = 0.535211
0.535211
184: Dice Coeff = 0.782145
0.782145
185: Dice Coeff = 0.839339
0.839339
186: Dice Coeff = 0.720000
0.720000
187: Dice Coeff = 0.696219
0.696219
188: Dice Coeff = 0.833834
0.833834
189: Dice Coeff = 0.768212
0.768212
190: Dice Coeff = 0.698669
0.698669
191: Dice Coeff = 0.805643
0.805643
192: Dice Coeff = 0.350140
0.350140
193: Dice Coeff = 0.785789
0.785789
194: Dice Coeff = 0.336066
0.336066
195: Dice Coeff = 0.717557
0.717557
196: Dice Coeff = 0.838095
0.838095
197: Dice Coeff = 0.779968
0.779968
198: Dice Coeff = 0.843900
0.843900
199: Dice Coeff = 0.854421
0.854421
200: Dice Coeff = 0.857510
0.857510
201: Dice Coeff = 0.818452
0.818452
202: Dice Coeff = 0.657277
0.657277
203: Dice Coeff = 0.829305
0.829305
204: Dice Coeff = 0.772000
0.772000
205: Dice Coeff = 0.904239
0.904239
206: Dice Coeff = 0.875817
0.875817
207: Dice Coeff = 0.931891
0.931891
208: Dice Coeff = 0.802521
0.802521
209: Dice Coeff = 0.898026
0.898026
210: Dice Coeff = 0.909520
0.909520
211: Dice Coeff = 0.942316
0.942316
212: Dice Coeff = 0.757692
0.757692
213: Dice Coeff = 0.798403
0.798403
214: Dice Coeff = 0.837675
0.837675
215: Dice Coeff = 0.933468
0.933468
216: Dice Coeff = 0.720497
0.720497
217: Dice Coeff = 0.804401
0.804401
218: Dice Coeff = 0.821951
0.821951
219: Dice Coeff = 0.905035
0.905035
220: Dice Coeff = 0.850649
0.850649
221: Dice Coeff = 0.898236
0.898236
222: Dice Coeff = 0.781145
0.781145
223: Dice Coeff = 0.870990
0.870990
224: Dice Coeff = 0.767947
0.767947
225: Dice Coeff = 0.845390
0.845390
226: Dice Coeff = 0.757372
0.757372
227: Dice Coeff = 0.867446
0.867446
228: Dice Coeff = 0.744257
0.744257
229: Dice Coeff = 0.848580
0.848580
230: Dice Coeff = 0.869425
0.869425
231: Dice Coeff = 0.942931
0.942931
232: Dice Coeff = 0.694387
0.694387
233: Dice Coeff = 0.807080
0.807080
234: Dice Coeff = 0.866787
0.866787
235: Dice Coeff = 0.927265
0.927265
236: Dice Coeff = 0.710480
0.710480
237: Dice Coeff = 0.622500
0.622500
238: Dice Coeff = 0.867442
0.867442
239: Dice Coeff = 0.897426
0.897426
240: Dice Coeff = 0.855754
0.855754
241: Dice Coeff = 0.587838
0.587838
242: Dice Coeff = 0.639437
0.639437
243: Dice Coeff = 0.860733
0.860733
244: Dice Coeff = 0.885167
0.885167
245: Dice Coeff = 0.934150
0.934150
246: Dice Coeff = 0.884163
0.884163
247: Dice Coeff = 0.900148
0.900148
248: Dice Coeff = 0.880597
0.880597
249: Dice Coeff = 0.833333
0.833333
250: Dice Coeff = 0.811944
0.811944
251: Dice Coeff = 0.834050
0.834050
252: Dice Coeff = 0.823308
0.823308
253: Dice Coeff = 0.772932
0.772932
254: Dice Coeff = 0.797896
0.797896
255: Dice Coeff = 0.692191
0.692191
256: Dice Coeff = 0.801782
0.801782
257: Dice Coeff = 0.910638
0.910638
258: Dice Coeff = 0.642100
0.642100
259: Dice Coeff = 0.867635
0.867635
260: Dice Coeff = 0.560113
0.560113
261: Dice Coeff = 0.658397
0.658397
262: Dice Coeff = 0.760504
0.760504
263: Dice Coeff = 0.772215
0.772215
264: Dice Coeff = 0.517572
0.517572
265: Dice Coeff = 0.831606
0.831606
266: Dice Coeff = 0.660652
0.660652
267: Dice Coeff = 0.758245
0.758245
268: Dice Coeff = 0.360577
0.360577
269: Dice Coeff = 0.896346
0.896346
Dice eval av. : 0.813215
In [22]:
#
#   Show History
#
mode = "SHOW_HISTORY"
if mode == "SHOW_HISTORY":
    # load pickle
    print(dname_checkpoints + '/' + fname_history)
    history = pickle.load(open(dname_checkpoints + '/' + fname_history, 'rb'))
    
    for k in history.keys():
        plt.plot(history[k])
        plt.title(k)
        plt.show()
checkpoints_fcn01/history.pkl